library(lmerTest)
library(ggeffects)
library(dplyr)
library(report)
package ‘report’ was built under R version 4.0.5
library(r2glmm)
library(performance)
package ‘performance’ was built under R version 4.0.5
Attaching package: ‘performance’

The following objects are masked from ‘package:sjstats’:

    icc, r2
library(corrr)
library(broom.mixed)
fullTest <- read.csv("../Cleaning/output/fullTest.csv")
fullTrain <- read.csv("../Cleaning/output/fullTrain.csv")

traitsFreqs <- read.csv("../Cleaning/output/traitFreqOverUnder.csv")
traitsFreqs <- rename(traitsFreqs, props = optionChoiceN)
fullTest <- merge(fullTest, traitsFreqs[c("trait","props")], by = "trait")
fullTest$propCorr <- ifelse(fullTest$Estimator=="Underestimator", 1-fullTest$props, fullTest$props)

uSubs <- unique(fullTest$subID)

indDiffs <- fullTest[!duplicated(fullTest$subID),]
fullTest$ingChoiceN <- as.factor(fullTest$ingChoiceN)
fullTest$novel <- as.factor(fullTest$novel)
fullTest$selfResp.Z <- scale(fullTest$selfResp)
fullTest$SE.Z <- scale(fullTest$SE)
fullTest$iSE.Z <- scale(fullTest$iSE)
fullTest$oSE.Z <- scale(fullTest$oSE)
fullTest$predicted.Z <- scale(fullTest$predicted)
fullTest$slope.Z <- scale(fullTest$slope)
fullTest$entropy.Z <- scale(fullTest$entropy)
fullTest$WSR.Z <- scale(fullTest$WSR)
fullTest$neighAveOutSE.Z <- scale(fullTest$neighAveOutSE)
fullTest$neighAveAllSE.Z <- scale(fullTest$neighAveAllSE)
fullTest$neighAveInSE.Z <- scale(fullTest$neighAveInSE)
fullTest$novel <- as.factor(fullTest$novel)
levels(fullTest$novel) <- list("Trained"  = "0", "Held Out" = "1")
evalMat <- matrix(nrow=148, ncol=1)
evalMat[,1] <- 1:148
evalMat <- as.data.frame(evalMat)
colnames(evalMat) <- c("Idx")
for(i in uSubs){
  eval <- fullTrain$selfResp[fullTrain$subID==i]
  cur <- cbind(fullTrain$Idx[fullTrain$subID==i], eval)
  colnames(cur) <- c("Idx",paste0("e",i))
  evalMat <- merge(evalMat, cur, by = "Idx", all.x = T)
}
MDSframe <-
  data.frame(Idx=1:148, 
           MDS=cmdscale(dist(evalMat[2:length(evalMat)]), eig=TRUE, k=3)$points
)
fullTest <-merge(fullTest, MDSframe, by = "Idx")
library(ggpubr)
Loading required package: ggplot2
Registered S3 methods overwritten by 'broom':
  method            from  
  tidy.glht         jtools
  tidy.summary.glht jtools
Registered S3 method overwritten by 'data.table':
  method           from
  print.data.table     
Registered S3 methods overwritten by 'car':
  method                          from
  influence.merMod                lme4
  cooks.distance.influence.merMod lme4
  dfbeta.influence.merMod         lme4
  dfbetas.influence.merMod        lme4
evalMat <- matrix(nrow=0, ncol=3)
evalMat[,2] <- 1:148
evalMat <- as.data.frame(evalMat)
colnames(evalMat) <- c("subID","Idx","eval")
for(i in uSubs){
  eval <- fullTrain$selfResp[fullTrain$subID==i]
  present <- which(!is.na(eval))
  Idx=fullTrain$Idx[fullTrain$subID==i]
  Idx=Idx[present]
  eval=eval[present]
  cur <- cbind(subID=i,
               Idx = Idx,
               MDS=cmdscale(dist(eval), eig=TRUE, k=1)$points)
  evalMat <- rbind(evalMat, cur)
}
# prop.test(traitsFreqs$optionChoiceN, traitsFreqs$N, p=rep(.5,length(traitsFreqs$N)))
# 
# m <- glmer( ingChoiceN ~ trait + ( 1 | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
#                                     optCtrl = list(maxfun = 100000)),
#     nAGQ = 1)
# 
# fullTest$trait <- as.factor(fullTest$trait)
# contrasts(fullTest$trait) <- contr.sum(148)
# m <- glm(optionChoiceN ~ trait, family = binomial,
#           data = fullTest
#          )
# summary(m)
# traitsFreqs$trait <- as.factor(traitsFreqs$trait)
# contrasts(traitsFreqs$trait) <- contr.sum(148)
# m <- glm(optionChoiceN ~ 1, family = binomial,
#           data = traitsFreqs
#          )
# summary(m)
# check_overdispersion(m)
# check_model(m)
# 
# m <- glm(optionChoiceN ~ trait, family = quasibinomial,
#           data = traitsFreqs
#          )
# check_overdispersion(m)
# check_model(m)
# 
# t.test()
# 
# m <- glm(optionChoiceN ~ trait, family = poisson,
#           data = traitsFreqs
#          )
# check_overdispersion(m)
# check_model(m)
propMatrix <- matrix(nrow=148,ncol=7)
for(i in 1:148){
    traitDf <- subset(fullTest, Idx==i)
    test <- t.test(as.numeric(traitDf$ingChoiceN)-1, mu=.50)
    propMatrix[i, ] <- c(i, test$statistic, test$p.value, test$conf.int, test$estimate, test$parameter)
}
colnames(propMatrix) <- c("Idx", "stat", "p", "LCI", "UCI", "est", "param")
propMatrix <- as.data.frame(propMatrix)
propMatrix$trait <- traitsFreqs$trait
propMatrix <- propMatrix[order(propMatrix$p),]
x <- indDiffs %>% 
    select(groupHomoph, seHomoph, DS:SING.Inter) %>%
  correlate() %>% 
  focus(groupHomoph) %>%
    arrange(groupHomoph)

x %>% 
  mutate(rowname = factor(rowname, levels = rowname[order(groupHomoph)])) %>%  # Order by correlation strength
  ggplot(aes(x = rowname, y = groupHomoph)) +
    geom_bar(stat = "identity") +
    ylab("Correlation Coefficient") +
    xlab("Individual Differences") + theme_grey(base_size = 9)  + theme(axis.text.x = element_text(angle = 90,hjust = 1)) +
  theme(axis.text.x = element_text( 
                           size = 9, angle = 45, vjust = 1)) + theme(axis.title.x = element_text(vjust=1.9)) + theme(axis.text=element_text(size=9),
        axis.title=element_text(size=9,face="bold")) + theme(legend.text = element_text(size=9)) + theme(panel.border = element_rect(colour = "black", fill = NA, size =1)) + theme(legend.title = element_blank()) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))

Familiarity predicts Reaction Time

m <- lmer( log(RT) ~ fam + ( fam | subID) + ( 1 | trait), data = fullTest)
boundary (singular) fit: see help('isSingular')
summary(m)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: log(RT) ~ fam + (fam | subID) + (1 | trait)
   Data: fullTest

REML criterion at convergence: 17308.9

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-8.2343 -0.5104 -0.0991  0.4294  7.7147 

Random effects:
 Groups   Name        Variance  Std.Dev. Corr
 trait    (Intercept) 3.815e-03 0.061765     
 subID    (Intercept) 6.007e-01 0.775047     
          fam         1.576e-07 0.000397 1.00
 Residual             8.561e-01 0.925263     
Number of obs: 6364, groups:  trait, 148; subID, 43

Fixed effects:
             Estimate Std. Error        df t value Pr(>|t|)   
(Intercept) 4.215e-02  1.320e-01 4.521e+01   0.319  0.75098   
fam         7.134e-03  2.574e-03 1.535e+02   2.771  0.00627 **
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
    (Intr)
fam -0.414
optimizer (nloptwrap) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("fam")) %>% plot()
Model has log-transformed response. Back-transforming predictions to original response scale. Standard errors are still on the log-scale.

tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
m <- lmer( log(RT) ~ fam + propCorr + desirability + ( fam | subID) + ( 1 | trait), data = fullTest)
boundary (singular) fit: see help('isSingular')
Model failed to converge with 1 negative eigenvalue: -2.4e+03
summary(m)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: log(RT) ~ fam + propCorr + desirability + (fam | subID) + (1 |      trait)
   Data: fullTest

REML criterion at convergence: 17319

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-8.2458 -0.5121 -0.0995  0.4323  7.7059 

Random effects:
 Groups   Name        Variance  Std.Dev.  Corr
 trait    (Intercept) 1.993e-06 0.0014116     
 subID    (Intercept) 5.231e-01 0.7232289     
          fam         9.680e-07 0.0009839 1.00
 Residual             8.607e-01 0.9277194     
Number of obs: 6364, groups:  trait, 148; subID, 43

Fixed effects:
               Estimate Std. Error         df t value Pr(>|t|)   
(Intercept)   4.221e-02  1.910e-01  2.895e+02   0.221  0.82525   
fam           7.534e-03  2.602e-03  3.434e+03   2.896  0.00381 **
propCorr      7.677e-02  8.447e-02  6.292e+03   0.909  0.36349   
desirability -8.251e-03  2.842e-02  6.316e+03  -0.290  0.77157   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) fam    prpCrr
fam          0.068              
propCorr    -0.206  0.059       
desirabilty -0.728 -0.407 -0.041
optimizer (nloptwrap) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("fam")) %>% plot()
Model has log-transformed response. Back-transforming predictions to original response scale. Standard errors are still on the log-scale.

tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")

Replication of prior self-anchoring findings: Self-evaluations predicting ingroup evaluations

No covariates

m <- glmer( ingChoiceN ~ selfResp.Z +  ( selfResp.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
m <- glmer( ingChoiceN ~ selfResp.Z +  ( selfResp.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ selfResp.Z + (selfResp.Z | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4927.2   4964.6  -2457.6   4915.2     3761 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8028 -1.0018  0.5326  0.8867  2.9204 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr
 trait  (Intercept) 4.556e-16 2.134e-08     
 subID  (Intercept) 3.259e-01 5.709e-01     
        selfResp.Z  1.153e-01 3.396e-01 0.54
Number of obs: 3767, groups:  trait, 148; subID, 43

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)    
(Intercept)  0.27860    0.09480   2.939  0.00329 ** 
selfResp.Z   0.26859    0.06597   4.071 4.68e-05 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
           (Intr)
selfResp.Z 0.397 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Covariates

m <- glmer( ingChoiceN ~ selfResp.Z + propCorr + desirability + ( selfResp.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ selfResp.Z + propCorr + desirability + (selfResp.Z |  
    subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4587.1   4636.9  -2285.5   4571.1     3759 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.5860 -0.8321  0.4162  0.7678  3.5706 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.00000  0.0000       
 subID  (Intercept) 0.36416  0.6035       
        selfResp.Z  0.09248  0.3041   0.59
Number of obs: 3767, groups:  trait, 148; subID, 43

Fixed effects:
             Estimate Std. Error z value Pr(>|z|)    
(Intercept)  -1.76229    0.50643  -3.480 0.000502 ***
selfResp.Z    0.31074    0.06323   4.914 8.91e-07 ***
propCorr      4.91447    0.27981  17.563  < 2e-16 ***
desirability -0.07200    0.08341  -0.863 0.388029    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slfR.Z prpCrr
selfResp.Z   0.170              
propCorr    -0.228  0.055       
desirabilty -0.941 -0.110 -0.047
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
library(broom.mixed)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
        Effect   Rsq upper.CL lower.CL
1        Model 0.094    0.112    0.077
3     propCorr 0.079    0.096    0.064
2   selfResp.Z 0.017    0.026    0.009
4 desirability 0.000    0.002    0.000
ggpredict(m, c("selfResp.Z")) %>% plot(show.title=F) + xlab("Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/SelfProjection.tiff",dpi=600)
Saving 7.29 x 4.51 in image

Does similarity-weighted self-evaluation average predict ingroup choices?

No covariates

m <- glmer( ingChoiceN ~ WSR.Z + ( WSR.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
m <- glmer( ingChoiceN ~ WSR.Z + ( WSR.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ WSR.Z + (WSR.Z | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  8068.6   8109.0  -4028.3   8056.6     6271 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-7.6462 -0.9681  0.4215  0.8628  3.6516 

Random effects:
 Groups Name        Variance  Std.Dev. Corr
 trait  (Intercept)  0.001153 0.03395      
 subID  (Intercept)  8.455504 2.90783      
        WSR.Z       10.576742 3.25219  0.41
Number of obs: 6277, groups:  trait, 148; subID, 43

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)
(Intercept)   0.1783     0.4747   0.376    0.707
WSR.Z         0.8192     0.5337   1.535    0.125

Correlation of Fixed Effects:
      (Intr)
WSR.Z 0.352 

Covariates

m <- glmer( ingChoiceN ~ WSR.Z + propCorr + desirability + ( WSR.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ WSR.Z + propCorr + desirability + (WSR.Z | subID) +      (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7581.5   7635.5  -3782.8   7565.5     6269 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-8.6664 -0.8183  0.3757  0.7582  3.6929 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.000    0.000        
 subID  (Intercept) 7.667    2.769        
        WSR.Z       8.146    2.854    0.44
Number of obs: 6277, groups:  trait, 148; subID, 43

Fixed effects:
             Estimate Std. Error z value Pr(>|z|)    
(Intercept)  -1.63543    0.60244  -2.715  0.00663 ** 
WSR.Z         1.05658    0.48269   2.189  0.02860 *  
propCorr      4.80291    0.22735  21.126  < 2e-16 ***
desirability -0.08427    0.06602  -1.276  0.20178    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) WSR.Z  prpCrr
WSR.Z        0.334              
propCorr    -0.143  0.036       
desirabilty -0.628 -0.095 -0.060
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
        Effect   Rsq upper.CL lower.CL
1        Model 0.127    0.142    0.113
2        WSR.Z 0.094    0.107    0.081
3     propCorr 0.041    0.051    0.032
4 desirability 0.000    0.001    0.000

Does self-evaluation weighted similarity predict ingroup choices?

No covariates

m <- glmer( ingChoiceN ~ SE.Z + ( SE.Z | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ SE.Z + (SE.Z | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  8211.1   8244.9  -4100.6   8201.1     6272 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.0638 -0.9909  0.5065  0.8679  2.7771 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 subID  (Intercept) 0.36181  0.6015       
        SE.Z        0.09246  0.3041   0.49
Number of obs: 6277, groups:  subID, 43

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)   
(Intercept)  0.28008    0.09565   2.928  0.00341 **
SE.Z         0.03742    0.05378   0.696  0.48655   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
     (Intr)
SE.Z 0.414 

Covariates

m <- glmer( ingChoiceN ~ SE.Z + propCorr + desirability + ( SE.Z | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ SE.Z + propCorr + desirability + (SE.Z | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7691.0   7738.2  -3838.5   7677.0     6270 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.4338 -0.8533  0.4305  0.7844  3.5288 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 subID  (Intercept) 0.41958  0.6478       
        SE.Z        0.07936  0.2817   0.57
Number of obs: 6277, groups:  subID, 43

Fixed effects:
             Estimate Std. Error z value Pr(>|z|)    
(Intercept)  -1.73776    0.41882  -4.149 3.34e-05 ***
SE.Z          0.07698    0.05304   1.451    0.147    
propCorr      4.67962    0.21519  21.746  < 2e-16 ***
desirability -0.05576    0.06869  -0.812    0.417    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) SE.Z   prpCrr
SE.Z         0.332              
propCorr    -0.202  0.034       
desirabilty -0.936 -0.243 -0.055
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
        Effect   Rsq upper.CL lower.CL
1        Model 0.074    0.087    0.062
3     propCorr 0.073    0.086    0.062
2         SE.Z 0.001    0.003    0.000
4 desirability 0.000    0.001    0.000

Do cross-validated similarity*self-evaluation predictions predict ingroup choices?

No covariates

m <- glmer( ingChoiceN ~ predicted.Z + ( predicted.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
m <- glmer( ingChoiceN ~ predicted.Z + ( predicted.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ predicted.Z + (predicted.Z | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7064.9   7104.4  -3526.4   7052.9     5382 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.5159 -1.0120  0.6189  0.8789  2.0509 

Random effects:
 Groups Name        Variance  Std.Dev. Corr
 trait  (Intercept) 0.0000000 0.00000      
 subID  (Intercept) 0.3092826 0.55613      
        predicted.Z 0.0004218 0.02054  1.00
Number of obs: 5388, groups:  trait, 148; subID, 43

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)    
(Intercept)  0.31062    0.09159   3.391 0.000696 ***
predicted.Z  0.27783    0.08821   3.149 0.001636 ** 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr)
predicted.Z 0.156 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Covariates

m <- glmer( ingChoiceN ~ predicted.Z + propCorr + desirability + ( predicted.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ predicted.Z + propCorr + desirability + (predicted.Z |  
    subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  6642.0   6694.8  -3313.0   6626.0     5380 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.3386 -0.8900  0.4627  0.7848  4.0188 

Random effects:
 Groups Name        Variance  Std.Dev. Corr 
 trait  (Intercept) 0.0000000 0.00000       
 subID  (Intercept) 0.3575097 0.59792       
        predicted.Z 0.0002254 0.01501  -1.00
Number of obs: 5388, groups:  trait, 148; subID, 43

Fixed effects:
             Estimate Std. Error z value Pr(>|z|)    
(Intercept)  -2.12443    0.41091  -5.170 2.34e-07 ***
predicted.Z   0.30607    0.09566   3.199  0.00138 ** 
propCorr      4.48818    0.22835  19.655  < 2e-16 ***
desirability  0.03223    0.06662   0.484  0.62849    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z prpCrr
predicted.Z  0.073              
propCorr    -0.246  0.034       
desirabilty -0.932 -0.074 -0.032
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
        Effect   Rsq upper.CL lower.CL
1        Model 0.085    0.099    0.071
3     propCorr 0.069    0.083    0.057
2  predicted.Z 0.018    0.025    0.011
4 desirability 0.000    0.001    0.000
ggpredict(m, c("predicted.Z")) %>% plot(show.title=F) + xlab("Cross-Validated Self-Descriptiveness Predictions") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVprediction.tiff",dpi=600)
Saving 7.29 x 4.51 in image

Do people self-anchor more for higher indegree traits?

No covariates

m <- glmer( ingChoiceN ~ predicted.Z * inDegree + ( predicted.Z + inDegree | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)

Covariates

summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ predicted.Z * inDegree + propCorr + desirability +  
    (predicted.Z + inDegree | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  6656.2   6741.9  -3315.1   6630.2     5375 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.5514 -0.8888  0.4592  0.7783  4.4090 

Random effects:
 Groups Name        Variance  Std.Dev. Corr       
 trait  (Intercept) 0.0000000 0.00000             
 subID  (Intercept) 0.0000000 0.00000             
        predicted.Z 0.5970943 0.77272    NaN      
        inDegree    0.0002242 0.01497    NaN -0.11
Number of obs: 5388, groups:  trait, 148; subID, 43

Fixed effects:
                      Estimate Std. Error z value Pr(>|z|)    
(Intercept)          -2.130181   0.407992  -5.221 1.78e-07 ***
predicted.Z           0.206506   0.150277   1.374    0.169    
inDegree              0.001190   0.003729   0.319    0.750    
propCorr              4.490776   0.229122  19.600  < 2e-16 ***
desirability          0.032646   0.068194   0.479    0.632    
predicted.Z:inDegree  0.005562   0.003680   1.511    0.131    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z inDegr prpCrr dsrblt
predicted.Z  0.030                            
inDegree     0.021 -0.069                     
propCorr    -0.251  0.012  0.026              
desirabilty -0.929 -0.024 -0.152 -0.036       
prdctd.Z:nD  0.010 -0.314  0.023  0.001 -0.008
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Do people self-anchor more for higher outdegree traits?

No covariates

m <- glmer( ingChoiceN ~ predicted.Z * outDegree + ( predicted.Z + outDegree | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)

Covariates

m <- glmer( ingChoiceN ~ predicted.Z * outDegree + propCorr + desirability +  ( predicted.Z + outDegree | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "outDegree")) %>% plot(show.title=F) + xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()

Do cross-validated similarity*self-evaluation predictions predict ingroup choices, regardless of whether it was seen prior or not?

No covariates

m <- glmer( ingChoiceN ~ predicted.Z * novel + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ predicted.Z * novel + (predicted.Z + novel | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7072.4   7138.3  -3526.2   7052.4     5378 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.5663 -1.0129  0.6074  0.8791  2.0525 

Random effects:
 Groups Name          Variance  Std.Dev. Corr     
 subID  (Intercept)   3.062e-01 0.553356          
        predicted.Z   4.235e-04 0.020579 1.00     
        novelHeld Out 9.471e-05 0.009732 1.00 1.00
Number of obs: 5388, groups:  subID, 43

Fixed effects:
                          Estimate Std. Error z value Pr(>|z|)   
(Intercept)                0.29103    0.09680   3.007  0.00264 **
predicted.Z                0.27323    0.09591   2.849  0.00439 **
novelHeld Out              0.04090    0.06143   0.666  0.50560   
predicted.Z:novelHeld Out  0.01432    0.06454   0.222  0.82443   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z nvlHlO
predicted.Z  0.081              
novelHeldOt -0.308  0.086       
prdctd.Z:HO  0.087 -0.376 -0.053
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Covariates

m <- glmer( ingChoiceN ~ predicted.Z * novel + propCorr + desirability + ( predicted.Z + novel | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("predicted.Z", "novel")) %>% plot(show.title=F)+ xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()  + scale_color_discrete(labels = c("Trained","Held-Out"))
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVpredictionGeneralize.tiff",dpi=600)

Does generalization depend on outdegree?

No covariates

m <- glmer( ingChoiceN ~ predicted.Z * novel * outDegree  + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ predicted.Z * novel * outDegree + (predicted.Z +      novel | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7077.5   7169.8  -3524.7   7049.5     5374 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.6040 -1.0156  0.6007  0.8775  2.0801 

Random effects:
 Groups Name          Variance  Std.Dev. Corr     
 subID  (Intercept)   0.3060322 0.55320           
        predicted.Z   0.0006169 0.02484  1.00     
        novelHeld Out 0.0001158 0.01076  1.00 1.00
Number of obs: 5388, groups:  subID, 43

Fixed effects:
                                     Estimate Std. Error z value Pr(>|z|)  
(Intercept)                          0.225934   0.117025   1.931   0.0535 .
predicted.Z                          0.242491   0.116671   2.078   0.0377 *
novelHeld Out                        0.073738   0.116367   0.634   0.5263  
outDegree                            0.003083   0.003131   0.985   0.3248  
predicted.Z:novelHeld Out           -0.053561   0.116903  -0.458   0.6468  
predicted.Z:outDegree                0.001036   0.003233   0.320   0.7487  
novelHeld Out:outDegree             -0.001656   0.004670  -0.355   0.7230  
predicted.Z:novelHeld Out:outDegree  0.003237   0.004635   0.698   0.4849  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z nvlHlO outDgr pr.Z:HO pr.Z:D nvHO:D
predicted.Z  0.072                                           
novelHeldOt -0.444  0.054                                    
outDegree   -0.562 -0.006  0.557                             
prdctd.Z:HO  0.057 -0.500  0.023 -0.045                      
prdctd.Z:tD  0.052 -0.571 -0.048 -0.108  0.577               
nvlHldOt:tD  0.373 -0.028 -0.849 -0.669 -0.031   0.072       
prdc.Z:HO:D -0.032  0.401 -0.028  0.074 -0.835  -0.699  0.016
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Covariates

m <- glmer( ingChoiceN ~ predicted.Z * novel * outDegree  + propCorr + desirability + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ predicted.Z * novel * outDegree + propCorr + desirability +  
    (predicted.Z + novel | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  6654.1   6759.6  -3311.0   6622.1     5372 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.6625 -0.8920  0.4623  0.7856  4.0329 

Random effects:
 Groups Name          Variance  Std.Dev. Corr       
 subID  (Intercept)   0.3311901 0.57549             
        predicted.Z   0.0002513 0.01585  -1.00      
        novelHeld Out 0.0025627 0.05062   1.00 -1.00
Number of obs: 5388, groups:  subID, 43

Fixed effects:
                                      Estimate Std. Error z value Pr(>|z|)    
(Intercept)                         -2.0835900  0.4370594  -4.767 1.87e-06 ***
predicted.Z                          0.2817766  0.1218493   2.313   0.0208 *  
novelHeld Out                        0.1015024  0.1214803   0.836   0.4034    
outDegree                            0.0035754  0.0034356   1.041   0.2980    
propCorr                             4.4974214  0.2285401  19.679  < 2e-16 ***
desirability                         0.0085942  0.0746937   0.115   0.9084    
predicted.Z:novelHeld Out           -0.0721766  0.1221076  -0.591   0.5545    
predicted.Z:outDegree                0.0003677  0.0033374   0.110   0.9123    
novelHeld Out:outDegree             -0.0031030  0.0048310  -0.642   0.5207    
predicted.Z:novelHeld Out:outDegree  0.0044562  0.0048146   0.926   0.3547    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z nvlHlO outDgr prpCrr dsrblt pr.Z:HO pr.Z:D nvHO:D
predicted.Z  0.018                                                         
novelHeldOt -0.132  0.060                                                  
outDegree    0.167  0.009  0.516                                           
propCorr    -0.230  0.031  0.017  0.021                                    
desirabilty -0.925 -0.028  0.016 -0.342 -0.034                             
prdctd.Z:HO  0.013 -0.453  0.023 -0.054 -0.009  0.007                      
prdctd.Z:tD  0.019 -0.572 -0.052 -0.109 -0.012  0.000  0.577               
nvlHldOt:tD  0.121 -0.035 -0.848 -0.623 -0.019 -0.013 -0.028   0.077       
prdc.Z:HO:D -0.014  0.398 -0.026  0.075  0.015  0.001 -0.836  -0.695  0.013
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
                                Effect   Rsq upper.CL lower.CL
1                                Model 0.085    0.100    0.072
5                             propCorr 0.069    0.083    0.057
2                          predicted.Z 0.002    0.005    0.000
4                            outDegree 0.000    0.002    0.000
10 predicted.Z:novelHeld Out:outDegree 0.000    0.001    0.000
3                        novelHeld Out 0.000    0.001    0.000
9              novelHeld Out:outDegree 0.000    0.001    0.000
7            predicted.Z:novelHeld Out 0.000    0.001    0.000
6                         desirability 0.000    0.001    0.000
8                predicted.Z:outDegree 0.000    0.001    0.000
ggpredict(m, c("predicted.Z", "outDegree" ,"novel")) %>% plot(show.title=F)+ xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVpredictionGeneralize.tiff",dpi=600)
Saving 7.29 x 4.51 in image

Does generalization depend on indegree?

No covariates

m <- glmer( ingChoiceN ~ predicted.Z * novel * inDegree + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ predicted.Z * novel * inDegree + (predicted.Z +      novel | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7077.4   7169.7  -3524.7   7049.4     5374 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.5758 -1.0135  0.5989  0.8737  2.2289 

Random effects:
 Groups Name          Variance  Std.Dev. Corr     
 subID  (Intercept)   3.095e-01 0.556356          
        predicted.Z   5.908e-04 0.024306 1.00     
        novelHeld Out 5.933e-05 0.007703 1.00 1.00
Number of obs: 5388, groups:  subID, 43

Fixed effects:
                                     Estimate Std. Error z value Pr(>|z|)   
(Intercept)                         0.3288522  0.1251085   2.629  0.00858 **
predicted.Z                         0.1776097  0.1256027   1.414  0.15734   
novelHeld Out                      -0.0070703  0.1297527  -0.054  0.95654   
inDegree                           -0.0018145  0.0037459  -0.484  0.62811   
predicted.Z:novelHeld Out           0.0234077  0.1300056   0.180  0.85711   
predicted.Z:inDegree                0.0046982  0.0038152   1.231  0.21816   
novelHeld Out:inDegree              0.0022760  0.0053761   0.423  0.67203   
predicted.Z:novelHeld Out:inDegree -0.0004284  0.0053609  -0.080  0.93631   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z nvlHlO inDegr pr.Z:HO pr.Z:D nvHO:D
predicted.Z  0.067                                           
novelHeldOt -0.488  0.046                                    
inDegree    -0.628 -0.013  0.599                             
prdctd.Z:HO  0.048 -0.543  0.027 -0.032                      
prdctd.Z:nD  0.042 -0.637 -0.038 -0.075  0.621               
nvlHldOt:nD  0.433 -0.022 -0.880 -0.695 -0.032   0.053       
prdc.Z:HO:D -0.027  0.455 -0.030  0.054 -0.867  -0.713  0.020
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Covariates

summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ predicted.Z * novel * inDegree + (predicted.Z +      novel | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4842.6   4929.6  -2407.3   4814.6     3676 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.3708 -1.0425  0.6081  0.8478  2.1489 

Random effects:
 Groups Name          Variance Std.Dev. Corr       
 subID  (Intercept)   0.297726 0.54564             
        predicted.Z   0.014331 0.11971  -0.94      
        novelHeld Out 0.004817 0.06941   0.06  0.29
Number of obs: 3690, groups:  subID, 29

Fixed effects:
                                     Estimate Std. Error z value Pr(>|z|)  
(Intercept)                         0.3072752  0.1489679   2.063   0.0391 *
predicted.Z                         0.2147101  0.1427624   1.504   0.1326  
novelHeld Out                      -0.0106251  0.1574787  -0.067   0.9462  
inDegree                            0.0001678  0.0045112   0.037   0.9703  
predicted.Z:novelHeld Out          -0.1367979  0.1585589  -0.863   0.3883  
predicted.Z:inDegree                0.0019902  0.0043923   0.453   0.6505  
novelHeld Out:inDegree              0.0005728  0.0065070   0.088   0.9298  
predicted.Z:novelHeld Out:inDegree  0.0040134  0.0066092   0.607   0.5437  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z nvlHlO inDegr pr.Z:HO pr.Z:D nvHO:D
predicted.Z -0.172                                           
novelHeldOt -0.495  0.045                                    
inDegree    -0.626 -0.007  0.598                             
prdctd.Z:HO  0.043 -0.485  0.019 -0.033                      
prdctd.Z:nD  0.041 -0.652 -0.035 -0.069  0.585               
nvlHldOt:nD  0.437 -0.020 -0.878 -0.693 -0.027   0.048       
prdc.Z:HO:D -0.024  0.429 -0.025  0.046 -0.873  -0.666  0.020

Neighboring Dependencies Predicting Choices

No covariates

m <- glmer( ingChoiceN ~ neighAveOutSE.Z  + ( neighAveOutSE.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ neighAveOutSE.Z + (neighAveOutSE.Z | subID) + (1 |      trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  8135.8   8176.2  -4061.9   8123.8     6221 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8785 -1.0011  0.5203  0.8853  2.4945 

Random effects:
 Groups Name            Variance Std.Dev. Corr
 trait  (Intercept)     0.0000   0.0000       
 subID  (Intercept)     0.4271   0.6535       
        neighAveOutSE.Z 0.2070   0.4550   0.24
Number of obs: 6227, groups:  trait, 147; subID, 43

Fixed effects:
                Estimate Std. Error z value Pr(>|z|)   
(Intercept)      0.29168    0.10780   2.706  0.00682 **
neighAveOutSE.Z  0.17689    0.08467   2.089  0.03669 * 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr)
nghAvOtSE.Z 0.174 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Covariates

summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ neighAveOutSE.Z + propCorr + desirability + (neighAveOutSE.Z |  
    subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7624.0   7677.9  -3804.0   7608.0     6219 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.7522 -0.8558  0.4295  0.7816  4.1533 

Random effects:
 Groups Name            Variance Std.Dev. Corr
 trait  (Intercept)     0.0000   0.0000       
 subID  (Intercept)     0.4503   0.6710       
        neighAveOutSE.Z 0.1639   0.4048   0.31
Number of obs: 6227, groups:  trait, 147; subID, 43

Fixed effects:
                Estimate Std. Error z value Pr(>|z|)    
(Intercept)     -1.93132    0.39235  -4.922 8.55e-07 ***
neighAveOutSE.Z  0.23308    0.07962   2.927  0.00342 ** 
propCorr         4.74189    0.21941  21.612  < 2e-16 ***
desirability    -0.02581    0.06293  -0.410  0.68177    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) nAOSE. prpCrr
nghAvOtSE.Z  0.112              
propCorr    -0.241  0.038       
desirabilty -0.919 -0.065 -0.039
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Generalization of Outdegree Neighboring Self-Evaluations

No covariates

m <- glmer( ingChoiceN ~ neighAveOutSE.Z * novel + ( neighAveOutSE.Z + novel | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)

Covariates

m <- glmer( ingChoiceN ~ neighAveOutSE.Z * novel + propCorr + desirability + ( neighAveOutSE.Z + novel | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ neighAveOutSE.Z * novel + propCorr + desirability +  
    (neighAveOutSE.Z + novel | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7632.1   7719.7  -3803.0   7606.1     6214 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.9210 -0.8534  0.4273  0.7822  4.1241 

Random effects:
 Groups Name            Variance Std.Dev. Corr     
 trait  (Intercept)     0.000000 0.00000           
 subID  (Intercept)     0.436805 0.66091           
        neighAveOutSE.Z 0.161881 0.40234  0.28     
        novelHeld Out   0.001903 0.04362  0.38 0.99
Number of obs: 6227, groups:  trait, 147; subID, 43

Fixed effects:
                              Estimate Std. Error z value Pr(>|z|)    
(Intercept)                   -1.94646    0.39272  -4.956 7.18e-07 ***
neighAveOutSE.Z                0.20730    0.08225   2.520   0.0117 *  
novelHeld Out                  0.02661    0.05781   0.460   0.6453    
propCorr                       4.74567    0.21958  21.613  < 2e-16 ***
desirability                  -0.02516    0.06293  -0.400   0.6893    
neighAveOutSE.Z:novelHeld Out  0.06926    0.05848   1.184   0.2363    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) nAOSE. nvlHlO prpCrr dsrblt
nghAvOtSE.Z  0.105                            
novelHeldOt -0.051  0.071                     
propCorr    -0.241  0.032  0.003              
desirabilty -0.918 -0.065  0.003 -0.039       
ngAOSE.Z:HO -0.011 -0.263  0.024  0.020  0.008
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
                         Effect   Rsq upper.CL lower.CL
1                         Model 0.080    0.094    0.068
4                      propCorr 0.072    0.085    0.061
2               neighAveOutSE.Z 0.004    0.008    0.002
6 neighAveOutSE.Z:novelHeld Out 0.000    0.002    0.000
3                 novelHeld Out 0.000    0.001    0.000
5                  desirability 0.000    0.001    0.000
ggpredict(m, c("neighAveOutSE.Z","novel")) %>% plot(show.title=F)+ xlab("Outwards Neighboring Self-Evaluations") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
Data were 'prettified'. Consider using `terms="neighAveOutSE.Z [all]"` to get smooth plots.
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/outdegreeNeighborsGeneralization.tiff",dpi=600)
Saving 7.29 x 4.51 in image

Does entropy (i.e., uncertainty) predict likelihood of ingroup choices?

No covariates

m <- glmer( ingChoiceN ~ entropy.Z  + ( entropy.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ entropy.Z + (entropy.Z | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  8206.5   8246.9  -4097.2   8194.5     6271 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.8857 -0.9898  0.4813  0.8925  2.6246 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.000    0.000         
 subID  (Intercept) 2.491    1.578         
        entropy.Z   2.527    1.590    -0.50
Number of obs: 6277, groups:  trait, 148; subID, 43

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)  
(Intercept)   0.4721     0.2672   1.767   0.0772 .
entropy.Z    -0.4455     0.2776  -1.605   0.1085  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
          (Intr)
entropy.Z -0.404
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Covariates

m <- glmer( ingChoiceN ~ entropy.Z  + propCorr + desirability + ( entropy.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ entropy.Z + propCorr + desirability + (entropy.Z |  
    subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7668.0   7721.9  -3826.0   7652.0     6269 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-5.8533 -0.8442  0.4042  0.7714  5.0292 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.000    0.000         
 subID  (Intercept) 3.376    1.837         
        entropy.Z   3.216    1.793    -0.64
Number of obs: 6277, groups:  trait, 148; subID, 43

Fixed effects:
             Estimate Std. Error z value Pr(>|z|)    
(Intercept)  -1.80823    0.49355  -3.664 0.000249 ***
entropy.Z    -0.39190    0.30816  -1.272 0.203465    
propCorr      4.91308    0.22429  21.905  < 2e-16 ***
desirability -0.03031    0.06432  -0.471 0.637446    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z prpCrr
entropy.Z   -0.351              
propCorr    -0.195  0.004       
desirabilty -0.751  0.021 -0.042
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
        Effect   Rsq upper.CL lower.CL
1        Model 0.072    0.084    0.060
3     propCorr 0.055    0.066    0.044
2    entropy.Z 0.018    0.025    0.012
4 desirability 0.000    0.001    0.000
ggpredict(m, c("entropy.Z")) %>% plot(show.title=F) + xlab("Uncertainty") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/Uncertainty.tiff",dpi=600)
Saving 7.29 x 4.51 in image

Does a linear trend of similarity-based probabilities predict ingroup choices?

No covariates

m <- glmer( ingChoiceN ~ slope.Z  + ( slope.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ slope.Z + (slope.Z | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  8108.2   8148.6  -4048.1   8096.2     6271 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-7.5074 -0.9707  0.4360  0.8749  3.6255 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.000    0.000        
 subID  (Intercept) 5.717    2.391        
        slope.Z     7.279    2.698    0.19
Number of obs: 6277, groups:  trait, 148; subID, 43

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)   
(Intercept)   0.1771     0.4026    0.44  0.66008   
slope.Z       1.2160     0.4571    2.66  0.00781 **
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
        (Intr)
slope.Z 0.166 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Covariates

m <- glmer( ingChoiceN ~ slope.Z  + propCorr + desirability + ( slope.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ slope.Z + propCorr + desirability + (slope.Z | subID) +      (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7621.2   7675.2  -3802.6   7605.2     6269 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-8.1650 -0.8309  0.3847  0.7669  3.6155 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr
 trait  (Intercept) 3.197e-19 5.654e-10     
 subID  (Intercept) 3.256e+00 1.805e+00     
        slope.Z     4.631e+00 2.152e+00 0.18
Number of obs: 6277, groups:  trait, 148; subID, 43

Fixed effects:
             Estimate Std. Error z value Pr(>|z|)    
(Intercept)  -1.87029    0.49677  -3.765 0.000167 ***
slope.Z       1.22116    0.38451   3.176 0.001494 ** 
propCorr      4.74542    0.22383  21.201  < 2e-16 ***
desirability -0.06466    0.06497  -0.995 0.319579    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z prpCrr
slope.Z      0.147              
propCorr    -0.191  0.026       
desirabilty -0.735 -0.077 -0.051
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
ggpredict(m, c("slope.Z")) %>% plot(show.title=F) + xlab("Linear Trend of Greater Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
Data were 'prettified'. Consider using `terms="slope.Z [all]"` to get smooth plots.
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/Slope.tiff",dpi=600)
Saving 7.29 x 4.51 in image

Does a linear trend of similarity-based probabilities predict ingroup choices, controlling for self-descriptiveness?

m <- glmer( ingChoiceN ~ scale(slope) + selfResp.Z  + ( scale(slope) + selfResp.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ scale(slope) + selfResp.Z + (scale(slope) + selfResp.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  3324.8   3383.3  -1652.4   3304.8     2546 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.0960 -1.0011  0.5507  0.8387  2.3694 

Random effects:
 Groups Name         Variance Std.Dev. Corr       
 trait  (Intercept)  0.00000  0.0000              
 subID  (Intercept)  0.38198  0.6180              
        scale(slope) 0.04406  0.2099   -0.99      
        selfResp.Z   0.13234  0.3638    0.55 -0.66
Number of obs: 2556, groups:  trait, 148; subID, 29

Fixed effects:
             Estimate Std. Error z value Pr(>|z|)  
(Intercept)   0.27038    0.12721   2.126   0.0335 *
scale(slope)  0.21730    0.11988   1.813   0.0699 .
selfResp.Z    0.19319    0.08783   2.200   0.0278 *
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(s)
scale(slop) -0.406       
selfResp.Z   0.403 -0.454
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")

Does a non-parametric trend of similarity-based probabilities predict ingroup choices, controlling for self-descriptiveness?

m <- glmer( ingChoiceN ~ scale(nlslope) + selfResp.Z +  ( scale(nlslope) + selfResp.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ scale(nlslope) + selfResp.Z + (scale(nlslope) +      selfResp.Z | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  3327.3   3385.7  -1653.6   3307.3     2546 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.0256 -0.9895  0.5363  0.8481  2.3016 

Random effects:
 Groups Name           Variance Std.Dev. Corr       
 trait  (Intercept)    0.0000   0.0000              
 subID  (Intercept)    0.4056   0.6369              
        scale(nlslope) 0.2145   0.4632   -0.24      
        selfResp.Z     0.1390   0.3728    0.62 -0.44
Number of obs: 2556, groups:  trait, 148; subID, 29

Fixed effects:
               Estimate Std. Error z value Pr(>|z|)  
(Intercept)     0.35811    0.14766   2.425   0.0153 *
scale(nlslope)  0.17096    0.14329   1.193   0.2328  
selfResp.Z      0.20564    0.08781   2.342   0.0192 *
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(n)
scal(nlslp) -0.073       
selfResp.Z   0.393 -0.389
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")

Does a linear trend of similarity-based probabilities predict ingroup choices?

m <- glmer( ingChoiceN ~ slope.Z  + predicted.Z + ( slope.Z + predicted.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ slope.Z + predicted.Z + (slope.Z + predicted.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  6817.3   6883.2  -3398.6   6797.3     5378 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-9.6167 -0.9885  0.4410  0.8385  2.5346 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr       
 trait  (Intercept) 7.538e-15 8.682e-08            
 subID  (Intercept) 5.908e-01 7.686e-01            
        slope.Z     1.744e+01 4.176e+00 -0.63      
        predicted.Z 1.611e+01 4.013e+00  0.63 -1.00
Number of obs: 5388, groups:  trait, 148; subID, 43

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)  
(Intercept)   0.1207     0.1269   0.951   0.3416  
slope.Z       1.6286     0.7111   2.290   0.0220 *
predicted.Z  -1.3225     0.6924  -1.910   0.0561 .
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z
slope.Z     -0.632       
predicted.Z  0.638 -0.991
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")

Backwards solution: Can you predict self-evaluations from similarity to ingroup and outgroup choices?

m <- lmer( scale(selfResp) ~ scale(inGsim) + scale(outGsim) + (  scale(inGsim) + scale(outGsim) | subID) + (1 | trait), data = fullTrain)
Model failed to converge with max|grad| = 0.00239224 (tol = 0.002, component 1)
summary(m)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: scale(selfResp) ~ scale(inGsim) + scale(outGsim) + (scale(inGsim) +  
    scale(outGsim) | subID) + (1 | trait)
   Data: fullTrain

REML criterion at convergence: 9640.5

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.1528 -0.6037  0.0680  0.6497  2.9224 

Random effects:
 Groups   Name           Variance Std.Dev. Corr       
 trait    (Intercept)    0.09989  0.3160              
 subID    (Intercept)    0.20570  0.4535              
          scale(inGsim)  0.02907  0.1705   -0.26      
          scale(outGsim) 0.02580  0.1606    0.22 -0.97
 Residual                0.65418  0.8088              
Number of obs: 3811, groups:  trait, 148; subID, 43

Fixed effects:
               Estimate Std. Error       df t value Pr(>|t|)    
(Intercept)    -0.01146    0.07520 53.75711  -0.152 0.879491    
scale(inGsim)   0.15216    0.03659 54.07607   4.158 0.000115 ***
scale(outGsim) -0.02811    0.03435 41.30898  -0.818 0.417848    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(nG)
scale(nGsm) -0.175        
scale(tGsm)  0.143 -0.558 
optimizer (nloptwrap) convergence code: 0 (OK)
Model failed to converge with max|grad| = 0.00239224 (tol = 0.002, component 1)
tidy(m,conf.int=TRUE,effects="fixed")
ggpredict(m, c("inGsim")) %>% plot(show.title=F) + xlab("Similarity to Ingroup Choices") + ylab("Self-Evaluation") + jtools::theme_apa()

m <- glmer( ingChoiceN ~ scale(slope) * novel  + ( scale(slope) + novel | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ scale(slope) * novel + (scale(slope) + novel | subID) +      (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  8116.8   8191.0  -4047.4   8094.8     6266 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-7.6460 -0.9719  0.4342  0.8748  3.6722 

Random effects:
 Groups Name          Variance  Std.Dev.  Corr       
 trait  (Intercept)   2.106e-15 4.589e-08            
 subID  (Intercept)   5.839e+00 2.417e+00            
        scale(slope)  7.265e+00 2.695e+00  0.18      
        novelHeld Out 4.776e-03 6.911e-02 -0.74  0.53
Number of obs: 6277, groups:  trait, 148; subID, 43

Fixed effects:
                           Estimate Std. Error z value Pr(>|z|)   
(Intercept)                0.181581   0.406997   0.446  0.65549   
scale(slope)               1.211092   0.457627   2.646  0.00813 **
novelHeld Out              0.020056   0.056590   0.354  0.72304   
scale(slope):novelHeld Out 0.001002   0.063191   0.016  0.98735   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(s) nvlHlO
scale(slop)  0.157              
novelHeldOt -0.179  0.082       
scl(slp):HO  0.004 -0.058  0.030
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ eSE + (eSE | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4785.1   4816.1  -2387.5   4775.1     3635 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.6814 -1.0139  0.5896  0.8615  2.8232 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 subID  (Intercept) 24.66    4.965         
        eSE         16.76    4.094    -1.00
Number of obs: 3640, groups:  subID, 25

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)
(Intercept)  -0.4974     1.1746  -0.424    0.672
eSE           0.6004     0.9522   0.631    0.528

Correlation of Fixed Effects:
    (Intr)
eSE -0.996
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ sSE * novel + (sSE | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4791.8   4841.4  -2387.9   4775.8     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.6486 -1.0125  0.5929  0.8627  2.7918 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept)  0.000   0.000         
 subID  (Intercept)  1.134   1.065         
        sSE         26.117   5.111    -0.90
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)
(Intercept)  0.03974    0.29415   0.135    0.893
sSE          0.89300    1.28620   0.694    0.487
novel1       0.12039    0.32510   0.370    0.711
sSE:novel1  -0.36572    1.26144  -0.290    0.772

Correlation of Fixed Effects:
           (Intr) sSE    novel1
sSE        -0.928              
novel1     -0.427  0.366       
sSE:novel1  0.413 -0.372 -0.976
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
m <- glmer( ingChoiceN ~ SE.Z * novel + ( SE.Z + novel | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(oSE) + (scale(oSE) | subID) + (1 |      trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  5482.7   5520.8  -2735.3   5470.7     4215 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.6879 -0.9939  0.5179  0.8500  2.8947 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.0000   0.0000       
 subID  (Intercept) 0.3871   0.6222       
        scale(oSE)  0.1068   0.3267   0.59
Number of obs: 4221, groups:  trait, 148; subID, 29

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)  
(Intercept)  0.30322    0.12022   2.522   0.0117 *
scale(oSE)   0.08435    0.06924   1.218   0.2232  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
           (Intr)
scale(oSE) 0.504 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(NTB) + ( predicted.Z | subID) + ( predicted.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(NTB) + ( predicted.Z | subID) + ( predicted.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * scale(NTB) + (predicted.Z |  
    subID) + (predicted.Z | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7069.5   7135.5  -3524.8   7049.5     5378 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.5031 -1.0174  0.6124  0.8754  2.0570 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr
 trait  (Intercept) 4.970e-14 2.229e-07     
        predicted.Z 2.168e-14 1.472e-07 1.00
 subID  (Intercept) 2.908e-01 5.393e-01     
        predicted.Z 5.770e-04 2.402e-02 1.00
Number of obs: 5388, groups:  trait, 148; subID, 43

Fixed effects:
                       Estimate Std. Error z value Pr(>|z|)    
(Intercept)             0.32033    0.09038   3.544 0.000394 ***
predicted.Z             0.29293    0.09455   3.098 0.001947 ** 
scale(NTB)              0.17693    0.09817   1.802 0.071494 .  
predicted.Z:scale(NTB)  0.03974    0.08321   0.478 0.632997    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z s(NTB)
predicted.Z  0.129              
scale(NTB)   0.096  0.062       
prd.Z:(NTB)  0.136 -0.352  0.421
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(RSE) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(RSE) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4793.6   4843.2  -2388.8   4777.6     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8434 -1.0083  0.5747  0.8744  2.3956 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr 
 trait  (Intercept) 1.004e-18 1.002e-09      
 subID  (Intercept) 9.301e-01 9.644e-01      
        entropy.Z   1.594e+00 1.262e+00 -0.55
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                     Estimate Std. Error z value Pr(>|z|)   
(Intercept)            0.7127     0.2436   2.925  0.00344 **
entropy.Z             -0.5494     0.3071  -1.789  0.07362 . 
scale(RSE)            -0.2484     0.2335  -1.064  0.28751   
entropy.Z:scale(RSE)   0.3395     0.3118   1.089  0.27626   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(RSE)
entropy.Z   -0.412              
scale(RSE)  -0.019  0.206       
ent.Z:(RSE)  0.218 -0.047 -0.388
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "RSE")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SCC) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(SCC) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4795.1   4844.7  -2389.6   4779.1     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8462 -1.0069  0.5800  0.8735  2.3915 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr 
 trait  (Intercept) 6.577e-20 2.564e-10      
 subID  (Intercept) 9.521e-01 9.757e-01      
        entropy.Z   1.680e+00 1.296e+00 -0.56
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                      Estimate Std. Error z value Pr(>|z|)   
(Intercept)           0.690663   0.243870   2.832  0.00462 **
entropy.Z            -0.527405   0.314479  -1.677  0.09353 . 
scale(SCC)           -0.097378   0.240594  -0.405  0.68567   
entropy.Z:scale(SCC)  0.005057   0.299650   0.017  0.98653   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(SCC)
entropy.Z   -0.436              
scale(SCC)  -0.071  0.122       
ent.Z:(SCC)  0.118  0.025 -0.481
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "SCC")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(DS) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(DS) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4793.8   4843.4  -2388.9   4777.8     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8406 -1.0079  0.5813  0.8733  2.4261 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.000         
 subID  (Intercept) 0.8798   0.938         
        entropy.Z   1.6204   1.273    -0.55
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                    Estimate Std. Error z value Pr(>|z|)   
(Intercept)           0.7324     0.2379   3.078  0.00208 **
entropy.Z            -0.5417     0.3101  -1.747  0.08070 . 
scale(DS)             0.2496     0.2290   1.090  0.27583   
entropy.Z:scale(DS)  -0.3141     0.3095  -1.015  0.31015   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z sc(DS)
entropy.Z   -0.431              
scale(DS)    0.144 -0.206       
entr.Z:(DS) -0.214  0.020 -0.448
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "DS")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(NFC) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(NFC) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4791.1   4840.7  -2387.6   4775.1     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8458 -1.0084  0.5964  0.8739  2.4604 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.0000        
 subID  (Intercept) 0.7927   0.8903        
        entropy.Z   1.2373   1.1124   -0.55
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                     Estimate Std. Error z value Pr(>|z|)   
(Intercept)            0.7115     0.2278   3.123  0.00179 **
entropy.Z             -0.5087     0.2797  -1.819  0.06897 . 
scale(NFC)             0.2597     0.2271   1.144  0.25278   
entropy.Z:scale(NFC)  -0.5659     0.2716  -2.083  0.03722 * 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(NFC)
entropy.Z   -0.442              
scale(NFC)   0.112 -0.175       
ent.Z:(NFC) -0.190  0.048 -0.483
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "NFC")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SING.Ind) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(SING.Ind) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4791.7   4841.3  -2387.9   4775.7     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8319 -1.0092  0.5749  0.8759  2.4004 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.0000        
 subID  (Intercept) 0.6746   0.8214        
        entropy.Z   1.4260   1.1941   -0.65
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                          Estimate Std. Error z value Pr(>|z|)   
(Intercept)                 0.6476     0.2177   2.974  0.00294 **
entropy.Z                  -0.6070     0.2932  -2.071  0.03840 * 
scale(SING.Ind)            -0.5182     0.2563  -2.022  0.04318 * 
entropy.Z:scale(SING.Ind)   0.1566     0.2875   0.545  0.58605   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(SING
entropy.Z   -0.467              
scl(SING.I) -0.005  0.244       
e.Z:(SING.I  0.228 -0.066 -0.327
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "SING.Ind")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SING.Inter) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(SING.Inter) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4791.4   4841.0  -2387.7   4775.4     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8785 -1.0138  0.5936  0.8820  2.4011 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.0000        
 subID  (Intercept) 0.6094   0.7806        
        entropy.Z   1.7311   1.3157   -0.51
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                            Estimate Std. Error z value Pr(>|z|)    
(Intercept)                   0.7522     0.2142   3.512 0.000444 ***
entropy.Z                    -0.4557     0.3140  -1.451 0.146682    
scale(SING.Inter)             0.5286     0.2593   2.038 0.041513 *  
entropy.Z:scale(SING.Inter)  -0.1277     0.2946  -0.433 0.664789    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(SING
entropy.Z   -0.382              
scl(SING.I)  0.237 -0.005       
e.Z:(SING.I -0.056  0.040 -0.301
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "SING.Inter")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(Proto) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(Proto) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4795.2   4844.8  -2389.6   4779.2     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8447 -1.0052  0.5806  0.8732  2.3944 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.0000        
 subID  (Intercept) 0.9035   0.9505        
        entropy.Z   1.6478   1.2837   -0.54
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                       Estimate Std. Error z value Pr(>|z|)   
(Intercept)             0.69010    0.23886   2.889  0.00386 **
entropy.Z              -0.50042    0.31002  -1.614  0.10650   
scale(Proto)            0.08185    0.23908   0.342  0.73209   
entropy.Z:scale(Proto) -0.09448    0.29866  -0.316  0.75173   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z scl(P)
entropy.Z   -0.432              
scale(Prot)  0.057  0.041       
entrp.Z:(P)  0.011 -0.017 -0.281
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "Proto")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SI) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(SI) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4790.2   4839.8  -2387.1   4774.2     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8302 -1.0044  0.5814  0.8769  2.4447 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.0000        
 subID  (Intercept) 0.6831   0.8265        
        entropy.Z   1.8112   1.3458   -0.64
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                    Estimate Std. Error z value Pr(>|z|)    
(Intercept)          0.74323    0.21772   3.414 0.000641 ***
entropy.Z           -0.47302    0.31535  -1.500 0.133618    
scale(SI)            0.45524    0.21902   2.079 0.037661 *  
entropy.Z:scale(SI) -0.01954    0.30810  -0.063 0.949443    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z sc(SI)
entropy.Z   -0.487              
scale(SI)    0.165 -0.075       
entr.Z:(SI) -0.024  0.071 -0.503
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "SI")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(NTB) + ( entropy.Z | subID) + ( entropy.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(NTB) + (entropy.Z |      subID) + (entropy.Z | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4798.1   4860.1  -2389.0   4778.1     3630 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8355 -1.0062  0.5801  0.8745  2.3634 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr 
 trait  (Intercept) 0.000e+00 0.000e+00      
        entropy.Z   5.571e-16 2.360e-08  NaN 
 subID  (Intercept) 8.986e-01 9.479e-01      
        entropy.Z   1.595e+00 1.263e+00 -0.57
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                     Estimate Std. Error z value Pr(>|z|)   
(Intercept)           0.68431    0.23774   2.878   0.0040 **
entropy.Z            -0.52197    0.30393  -1.717   0.0859 . 
scale(NTB)            0.24331    0.23185   1.049   0.2940   
entropy.Z:scale(NTB) -0.06185    0.29468  -0.210   0.8337   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(NTB)
entropy.Z   -0.452              
scale(NTB)   0.040 -0.082       
ent.Z:(NTB) -0.066  0.031 -0.534
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "NTB")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(RSE) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(RSE) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ slope.Z * scale(RSE) + (slope.Z | subID) +      (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  8108.2   8162.2  -4046.1   8092.2     6269 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-7.5446 -0.9753  0.4334  0.8775  3.6256 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr
 trait  (Intercept) 1.047e-14 1.023e-07     
 subID  (Intercept) 5.372e+00 2.318e+00     
        slope.Z     7.133e+00 2.671e+00 0.18
Number of obs: 6277, groups:  trait, 148; subID, 43

Fixed effects:
                   Estimate Std. Error z value Pr(>|z|)   
(Intercept)          0.2029     0.3932   0.516  0.60584   
slope.Z              1.2754     0.4536   2.812  0.00492 **
scale(RSE)          -0.7906     0.4133  -1.913  0.05575 . 
slope.Z:scale(RSE)  -0.3153     0.4452  -0.708  0.47887   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z s(RSE)
slope.Z      0.156              
scale(RSE)  -0.030 -0.098       
slp.Z:(RSE) -0.090  0.015  0.139
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(RSE) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(RSE) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2111.4   2165.2  -1045.7   2091.4     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.8959 -1.0144  0.4983  0.9101  1.4069 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003365 0.05801      
        scale(desirability) 0.013385 0.11570  1.00
 subID  (Intercept)         0.415188 0.64435      
        scale(desirability) 0.001739 0.04170  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                               Estimate Std. Error z value Pr(>|z|)    
(Intercept)                     0.38520    0.20272   1.900  0.05741 .  
scale(desirability)             0.20932    0.06083   3.441  0.00058 ***
scale(RSE)                      0.27336    0.20314   1.346  0.17840    
scale(desirability):scale(RSE)  0.02205    0.05729   0.385  0.70036    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(RSE)
scl(dsrblt) 0.218               
scale(RSE)  0.019  0.019        
scl():(RSE) 0.018  0.128  0.233 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "RSE")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SCC) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(SCC) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2110.0   2163.8  -1045.0   2090.0     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.8913 -1.0078  0.4896  0.9083  1.3630 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003354 0.05791      
        scale(desirability) 0.013479 0.11610  1.00
 subID  (Intercept)         0.377675 0.61455      
        scale(desirability) 0.003107 0.05574  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                                Estimate Std. Error z value Pr(>|z|)    
(Intercept)                     0.385673   0.194130   1.987 0.046958 *  
scale(desirability)             0.209174   0.061907   3.379 0.000728 ***
scale(SCC)                      0.337654   0.195663   1.726 0.084403 .  
scale(desirability):scale(SCC) -0.006718   0.060638  -0.111 0.911783    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(SCC)
scl(dsrblt) 0.280               
scale(SCC)  0.026  0.024        
scl():(SCC) 0.022  0.143  0.293 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "SCC")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(DS) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(DS) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2110.9   2164.7  -1045.4   2090.9     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.7639 -1.0088  0.4900  0.9098  1.4151 

Random effects:
 Groups Name                Variance  Std.Dev. Corr
 trait  (Intercept)         0.0034089 0.05839      
        scale(desirability) 0.0134907 0.11615  1.00
 subID  (Intercept)         0.3893895 0.62401      
        scale(desirability) 0.0009413 0.03068  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                              Estimate Std. Error z value Pr(>|z|)    
(Intercept)                    0.38260    0.19680   1.944 0.051891 .  
scale(desirability)            0.20787    0.06023   3.451 0.000558 ***
scale(DS)                     -0.29740    0.19610  -1.517 0.129382    
scale(desirability):scale(DS) -0.03279    0.05445  -0.602 0.547033    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) sc(DS)
scl(dsrblt)  0.167              
scale(DS)   -0.015 -0.017       
scl(d):(DS) -0.015 -0.103  0.179
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "DS")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(NFC) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(NFC) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2112.6   2166.4  -1046.3   2092.6     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.8577 -1.0118  0.4889  0.9094  1.3512 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003355 0.05792      
        scale(desirability) 0.013450 0.11598  1.00
 subID  (Intercept)         0.479426 0.69241      
        scale(desirability) 0.002268 0.04762  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                               Estimate Std. Error z value Pr(>|z|)    
(Intercept)                     0.38517    0.21664   1.778 0.075415 .  
scale(desirability)             0.20871    0.06126   3.407 0.000657 ***
scale(NFC)                     -0.09903    0.21536  -0.460 0.645636    
scale(desirability):scale(NFC)  0.01942    0.05410   0.359 0.719619    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(NFC)
scl(dsrblt)  0.244              
scale(NFC)  -0.008 -0.007       
scl():(NFC) -0.008 -0.060  0.264
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "NFC")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SING.Ind) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(SING.Ind) +  
    (scale(desirability) | subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2112.7   2166.5  -1046.4   2092.7     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.8602 -1.0147  0.4910  0.9059  1.3673 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003337 0.05777      
        scale(desirability) 0.013431 0.11589  1.00
 subID  (Intercept)         0.486231 0.69730      
        scale(desirability) 0.002218 0.04710  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                                    Estimate Std. Error z value Pr(>|z|)    
(Intercept)                          0.38508    0.21806   1.766 0.077406 .  
scale(desirability)                  0.20921    0.06118   3.419 0.000628 ***
scale(SING.Ind)                      0.05346    0.21663   0.247 0.805083    
scale(desirability):scale(SING.Ind) -0.02489    0.05367  -0.464 0.642820    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(SING
scl(dsrblt) 0.242               
scl(SING.I) 0.004  0.003        
s():(SING.I 0.003  0.022  0.263 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "SING.Ind")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SING.Inter) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(SING.Inter) +  
    (scale(desirability) | subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2110.4   2164.2  -1045.2   2090.4     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.8256 -1.0104  0.4934  0.9125  1.4150 

Random effects:
 Groups Name                Variance  Std.Dev. Corr
 trait  (Intercept)         0.0000000 0.00000      
        scale(desirability) 0.0093185 0.09653   NaN
 subID  (Intercept)         0.3727206 0.61051      
        scale(desirability) 0.0009385 0.03063  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                                      Estimate Std. Error z value Pr(>|z|)    
(Intercept)                            0.38345    0.19280   1.989 0.046716 *  
scale(desirability)                    0.20642    0.05956   3.466 0.000529 ***
scale(SING.Inter)                     -0.33236    0.19353  -1.717 0.085910 .  
scale(desirability):scale(SING.Inter) -0.03516    0.05701  -0.617 0.537466    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(SING
scl(dsrblt)  0.162              
scl(SING.I) -0.022 -0.021       
s():(SING.I -0.020 -0.138  0.177
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "SING.Inter")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(Proto) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(Proto) +  
    (scale(desirability) | subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2109.2   2163.0  -1044.6   2089.2     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.3834 -1.0152  0.5623  0.9062  1.4029 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003279 0.05726      
        scale(desirability) 0.013172 0.11477  1.00
 subID  (Intercept)         0.374479 0.61195      
        scale(desirability) 0.001273 0.03567  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                                 Estimate Std. Error z value Pr(>|z|)    
(Intercept)                       0.39167    0.19357   2.023 0.043030 *  
scale(desirability)               0.21533    0.06089   3.536 0.000406 ***
scale(Proto)                     -0.38362    0.20188  -1.900 0.057402 .  
scale(desirability):scale(Proto) -0.07237    0.07165  -1.010 0.312464    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) scl(P)
scl(dsrblt)  0.194              
scale(Prot) -0.043 -0.047       
scl(ds):(P) -0.038 -0.225  0.229
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "Proto")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SI) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(SI) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2110.7   2164.5  -1045.4   2090.7     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.0641 -1.0169  0.5313  0.9074  1.5565 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003313 0.05756      
        scale(desirability) 0.013506 0.11621  1.00
 subID  (Intercept)         0.492358 0.70168      
        scale(desirability) 0.002593 0.05092  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                              Estimate Std. Error z value Pr(>|z|)    
(Intercept)                    0.38629    0.21938   1.761 0.078272 .  
scale(desirability)            0.21359    0.06156   3.470 0.000521 ***
scale(SI)                     -0.05538    0.21816  -0.254 0.799632    
scale(desirability):scale(SI) -0.08211    0.05464  -1.503 0.132865    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) sc(SI)
scl(dsrblt)  0.260              
scale(SI)   -0.008 -0.009       
scl(d):(SI) -0.009 -0.079  0.280
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "SI")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(NTB) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(NTB) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2112.8   2166.6  -1046.4   2092.8     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.9039 -1.0131  0.5005  0.9074  1.4211 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.00337  0.05805      
        scale(desirability) 0.01338  0.11569  1.00
 subID  (Intercept)         0.47789  0.69130      
        scale(desirability) 0.00211  0.04593  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                               Estimate Std. Error z value Pr(>|z|)    
(Intercept)                     0.38539    0.21633   1.782  0.07483 .  
scale(desirability)             0.20947    0.06112   3.427  0.00061 ***
scale(NTB)                     -0.11353    0.21542  -0.527  0.59819    
scale(desirability):scale(NTB) -0.01384    0.05480  -0.253  0.80060    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(NTB)
scl(dsrblt)  0.237              
scale(NTB)  -0.010 -0.010       
scl():(NTB) -0.009 -0.075  0.255
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "NTB")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * novel * scale(NFC) + (predicted.Z +  
    novel | subID) + (SE.Z * as.factor(novel) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  7096.7   7254.9  -3524.3   7048.7     5364 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.5776 -1.0099  0.6120  0.8739  2.0613 

Random effects:
 Groups Name                          Variance  Std.Dev.  Corr             
 trait  (Intercept)                   2.406e-07 0.0004905                  
        SE.Z                          1.919e-04 0.0138525 -0.80            
        as.factor(novel)Held Out      3.252e-05 0.0057030 -0.82  1.00      
        SE.Z:as.factor(novel)Held Out 2.260e-02 0.1503186 -0.79  1.00  1.00
 subID  (Intercept)                   3.192e-01 0.5649467                  
        predicted.Z                   5.547e-03 0.0744757 0.55             
        novelHeld Out                 9.336e-04 0.0305543 0.17  0.92       
Number of obs: 5388, groups:  trait, 148; subID, 43

Fixed effects:
                                     Estimate Std. Error z value Pr(>|z|)   
(Intercept)                           0.30788    0.10120   3.042  0.00235 **
predicted.Z                           0.23271    0.11743   1.982  0.04750 * 
novelHeld Out                         0.03714    0.06458   0.575  0.56526   
scale(NFC)                           -0.03902    0.10602  -0.368  0.71287   
predicted.Z:novelHeld Out             0.01637    0.07181   0.228  0.81966   
predicted.Z:scale(NFC)               -0.10215    0.10282  -0.993  0.32050   
novelHeld Out:scale(NFC)              0.04980    0.06121   0.814  0.41593   
predicted.Z:novelHeld Out:scale(NFC)  0.03690    0.06466   0.571  0.56821   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z nvlHlO s(NFC) pr.Z:HO p.Z:(N nHO:(N
predicted.Z  0.012                                           
novelHeldOt -0.311  0.130                                    
scale(NFC)  -0.060 -0.142 -0.050                             
prdctd.Z:HO  0.117 -0.378 -0.148  0.081                      
prd.Z:(NFC) -0.147  0.214  0.025  0.351 -0.010               
nvlHO:(NFC) -0.027  0.023  0.028 -0.244 -0.125   0.013       
p.Z:HO:(NFC  0.059 -0.052 -0.185 -0.013  0.309  -0.370  0.117
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SCC) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SCC")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(DS) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","DS")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(NFC) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel", "NFC")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SING.Ind) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SING.Ind")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SING.Inter) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SING.Inter")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(Proto) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "Proto")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SI) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SI")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(NTB) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "novel", "NTB")) %>% plot()
---
title: "R Notebook"
output: html_notebook
---

```{r}
library(lmerTest)
library(ggeffects)
library(dplyr)
library(report)
library(r2glmm)
library(performance)
library(corrr)
library(broom.mixed)
```

```{r}
fullTest <- read.csv("../Cleaning/output/fullTest.csv")
fullTrain <- read.csv("../Cleaning/output/fullTrain.csv")

traitsFreqs <- read.csv("../Cleaning/output/traitFreqOverUnder.csv")
traitsFreqs <- rename(traitsFreqs, props = optionChoiceN)
fullTest <- merge(fullTest, traitsFreqs[c("trait","props")], by = "trait")
fullTest$propCorr <- ifelse(fullTest$Estimator=="Underestimator", 1-fullTest$props, fullTest$props)

uSubs <- unique(fullTest$subID)

indDiffs <- fullTest[!duplicated(fullTest$subID),]
```

```{r}
fullTest$ingChoiceN <- as.factor(fullTest$ingChoiceN)
fullTest$novel <- as.factor(fullTest$novel)
fullTest$selfResp.Z <- scale(fullTest$selfResp)
fullTest$SE.Z <- scale(fullTest$SE)
fullTest$iSE.Z <- scale(fullTest$iSE)
fullTest$oSE.Z <- scale(fullTest$oSE)
fullTest$predicted.Z <- scale(fullTest$predicted)
fullTest$slope.Z <- scale(fullTest$slope)
fullTest$entropy.Z <- scale(fullTest$entropy)
fullTest$WSR.Z <- scale(fullTest$WSR)
fullTest$neighAveOutSE.Z <- scale(fullTest$neighAveOutSE)
fullTest$neighAveAllSE.Z <- scale(fullTest$neighAveAllSE)
fullTest$neighAveInSE.Z <- scale(fullTest$neighAveInSE)
```


```{r}
fullTest$novel <- as.factor(fullTest$novel)
levels(fullTest$novel) <- list("Trained"  = "0", "Held Out" = "1")
```

```{r}
evalMat <- matrix(nrow=148, ncol=1)
evalMat[,1] <- 1:148
evalMat <- as.data.frame(evalMat)
colnames(evalMat) <- c("Idx")
for(i in uSubs){
  eval <- fullTrain$selfResp[fullTrain$subID==i]
  cur <- cbind(fullTrain$Idx[fullTrain$subID==i], eval)
  colnames(cur) <- c("Idx",paste0("e",i))
  evalMat <- merge(evalMat, cur, by = "Idx", all.x = T)
}
```

```{r}
MDSframe <-
  data.frame(Idx=1:148, 
           MDS=cmdscale(dist(evalMat[2:length(evalMat)]), eig=TRUE, k=3)$points
)
fullTest <-merge(fullTest, MDSframe, by = "Idx")
```

```{r}
library(magrittr)
library(dplyr)
library(ggpubr)

mds <- evalMat %>%
  select(2:length(.)) %>%
  dist() %>%          
  isoMDS(k=2) %>%
  .$points %>%
  as_tibble()
colnames(mds) <- c("Dim.1", "Dim.2")

# Plot MDS
ggscatter(mds, x = "Dim.1", y = "Dim.2", 
          label = allPosCents$trait,
          size = 1,
          repel = TRUE)

mds$Idx <- 1:148

fullTest <-merge(fullTest, mds, by = "Idx")
```


```{r}
evalMat <- matrix(nrow=0, ncol=3)
evalMat[,2] <- 1:148
evalMat <- as.data.frame(evalMat)
colnames(evalMat) <- c("subID","Idx","eval")
for(i in uSubs){
  eval <- fullTrain$selfResp[fullTrain$subID==i]
  present <- which(!is.na(eval))
  Idx=fullTrain$Idx[fullTrain$subID==i]
  Idx=Idx[present]
  eval=eval[present]
  cur <- cbind(subID=i,
               Idx = Idx,
               MDS=cmdscale(dist(eval), eig=TRUE, k=1)$points)
  evalMat <- rbind(evalMat, cur)
}
```

```{r}
  
```


```{r}
# prop.test(traitsFreqs$optionChoiceN, traitsFreqs$N, p=rep(.5,length(traitsFreqs$N)))
# 
# m <- glmer( ingChoiceN ~ trait + ( 1 | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
#                                     optCtrl = list(maxfun = 100000)),
#     nAGQ = 1)
# 
# fullTest$trait <- as.factor(fullTest$trait)
# contrasts(fullTest$trait) <- contr.sum(148)
# m <- glm(optionChoiceN ~ trait, family = binomial,
#           data = fullTest
#          )
# summary(m)
```


```{r}
# traitsFreqs$trait <- as.factor(traitsFreqs$trait)
# contrasts(traitsFreqs$trait) <- contr.sum(148)
# m <- glm(optionChoiceN ~ 1, family = binomial,
#           data = traitsFreqs
#          )
# summary(m)
# check_overdispersion(m)
# check_model(m)
# 
# m <- glm(optionChoiceN ~ trait, family = quasibinomial,
#           data = traitsFreqs
#          )
# check_overdispersion(m)
# check_model(m)
# 
# t.test()
# 
# m <- glm(optionChoiceN ~ trait, family = poisson,
#           data = traitsFreqs
#          )
# check_overdispersion(m)
# check_model(m)
```

```{r}
propMatrix <- matrix(nrow=148,ncol=7)
for(i in 1:148){
    traitDf <- subset(fullTest, Idx==i)
    test <- t.test(as.numeric(traitDf$ingChoiceN)-1, mu=.50)
    propMatrix[i, ] <- c(i, test$statistic, test$p.value, test$conf.int, test$estimate, test$parameter)
}
colnames(propMatrix) <- c("Idx", "stat", "p", "LCI", "UCI", "est", "param")
propMatrix <- as.data.frame(propMatrix)
propMatrix$trait <- traitsFreqs$trait
propMatrix <- propMatrix[order(propMatrix$p),]
```

```{r}
x <- indDiffs %>% 
    select(groupHomoph, seHomoph, DS:SING.Inter) %>%
  correlate() %>% 
  focus(groupHomoph) %>%
    arrange(groupHomoph)

x %>% 
  mutate(rowname = factor(rowname, levels = rowname[order(groupHomoph)])) %>%  # Order by correlation strength
  ggplot(aes(x = rowname, y = groupHomoph)) +
    geom_bar(stat = "identity") +
    ylab("Correlation Coefficient") +
    xlab("Individual Differences") + theme_grey(base_size = 9)  + theme(axis.text.x = element_text(angle = 90,hjust = 1)) +
  theme(axis.text.x = element_text( 
                           size = 9, angle = 45, vjust = 1)) + theme(axis.title.x = element_text(vjust=1.9)) + theme(axis.text=element_text(size=9),
        axis.title=element_text(size=9,face="bold")) + theme(legend.text = element_text(size=9)) + theme(panel.border = element_rect(colour = "black", fill = NA, size =1)) + theme(legend.title = element_blank()) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
```


# Familiarity predicts Reaction Time

```{r}
m <- lmer( log(RT) ~ fam + ( fam | subID) + ( 1 | trait), data = fullTest)
summary(m)
ggpredict(m, c("fam")) %>% plot()
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```

```{r}
m <- lmer( log(RT) ~ fam + propCorr + desirability + ( fam | subID) + ( 1 | trait), data = fullTest)
summary(m)
ggpredict(m, c("fam")) %>% plot()
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```


# Replication of prior self-anchoring findings: Self-evaluations predicting ingroup evaluations

## No covariates

```{r}
m <- glmer( ingChoiceN ~ selfResp.Z +  ( selfResp.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```


## Covariates

```{r}
m <- glmer( ingChoiceN ~ selfResp.Z + propCorr + desirability + ( selfResp.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("selfResp.Z")) %>% plot(show.title=F) + xlab("Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/SelfProjection.tiff",dpi=600)
```

# Does similarity-weighted self-evaluation average predict ingroup choices?

## No covariates

```{r}
m <- glmer( ingChoiceN ~ WSR.Z + ( WSR.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```


## Covariates

```{r}
m <- glmer( ingChoiceN ~ WSR.Z + propCorr + desirability + ( WSR.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
```
# Does self-evaluation weighted similarity predict ingroup choices?

## No covariates

```{r}
m <- glmer( ingChoiceN ~ SE.Z + ( SE.Z | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```

## Covariates

```{r}
m <- glmer( ingChoiceN ~ SE.Z + propCorr + desirability + ( SE.Z | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
```

# Do cross-validated similarity*self-evaluation predictions predict ingroup choices? 

## No covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z + ( predicted.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```

# Covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z + propCorr + desirability + ( predicted.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("predicted.Z")) %>% plot(show.title=F) + xlab("Cross-Validated Self-Descriptiveness Predictions") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVprediction.tiff",dpi=600)
```
# Do people self-anchor more for higher indegree traits?

## No covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * inDegree + ( predicted.Z + inDegree | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```

## Covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * inDegree + propCorr + desirability +  ( predicted.Z + inDegree | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "inDegree")) %>% plot(show.title=F) + xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
```


# Do people self-anchor more for higher outdegree traits?

## No covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * outDegree + ( predicted.Z + outDegree | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```

## Covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * outDegree + propCorr + desirability +  ( predicted.Z + outDegree | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "outDegree")) %>% plot(show.title=F) + xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
```

# Do cross-validated similarity*self-evaluation predictions predict ingroup choices, regardless of whether it was seen prior or not?

## No covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * novel + ( predicted.Z + novel | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```

## Covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * novel + propCorr + desirability + ( predicted.Z + novel | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("predicted.Z", "novel")) %>% plot(show.title=F)+ xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()  + scale_color_discrete(labels = c("Trained","Held-Out"))
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVpredictionGeneralize.tiff",dpi=600)
```

# Does generalization depend on outdegree?

## No covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * novel * outDegree  + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```

## Covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * novel * outDegree  + propCorr + desirability + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("predicted.Z", "outDegree" ,"novel")) %>% plot(show.title=F)+ xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVpredictionGeneralize.tiff",dpi=600)
```
# Does generalization depend on indegree?

## No covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * novel * inDegree + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```

## Covariates

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * novel * inDegree + propCorr + desirability +  ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("predicted.Z", "outDegree" ,"novel")) %>% plot(show.title=F)+ xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
```

# Neighboring Dependencies Predicting Choices

## No covariates

```{r}
m <- glmer( ingChoiceN ~ neighAveOutSE.Z  + ( neighAveOutSE.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```

## Covariates

```{r}
m <- glmer( ingChoiceN ~ neighAveOutSE.Z  + propCorr + desirability + ( neighAveOutSE.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("neighAveOutSE.Z")) %>% plot(show.title=F)+ xlab("Outwards Neighboring Self-Evaluations") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/outdegreeNeighbors.tiff",dpi=600)
```

# Generalization of Outdegree Neighboring Self-Evaluations

## No covariates

```{r}
m <- glmer( ingChoiceN ~ neighAveOutSE.Z * novel + ( neighAveOutSE.Z + novel | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```

## Covariates

```{r}
m <- glmer( ingChoiceN ~ neighAveOutSE.Z * novel + propCorr + desirability + ( neighAveOutSE.Z + novel | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("neighAveOutSE.Z","novel")) %>% plot(show.title=F)+ xlab("Outwards Neighboring Self-Evaluations") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/outdegreeNeighborsGeneralization.tiff",dpi=600)
```

# Does entropy (i.e., uncertainty) predict likelihood of ingroup choices?

## No covariates

```{r}
m <- glmer( ingChoiceN ~ entropy.Z  + ( entropy.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```

## Covariates

```{r}
m <- glmer( ingChoiceN ~ entropy.Z  + propCorr + desirability + ( entropy.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("entropy.Z")) %>% plot(show.title=F) + xlab("Uncertainty") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/Uncertainty.tiff",dpi=600)
```

# Does a linear trend of similarity-based probabilities predict ingroup choices?

## No covariates

```{r}
m <- glmer( ingChoiceN ~ slope.Z  + ( slope.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```

## Covariates

```{r}
m <- glmer( ingChoiceN ~ slope.Z  + propCorr + desirability + ( slope.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
ggpredict(m, c("slope.Z")) %>% plot(show.title=F) + xlab("Linear Trend of Greater Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/Slope.tiff",dpi=600)
```

# Does a linear trend of similarity-based probabilities predict ingroup choices, controlling for self-descriptiveness?

```{r}
m <- glmer( ingChoiceN ~ scale(slope) + selfResp.Z  + ( scale(slope) + selfResp.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```

# Does a non-parametric trend of similarity-based probabilities predict ingroup choices, controlling for self-descriptiveness?

```{r}
m <- glmer( ingChoiceN ~ scale(nlslope) + selfResp.Z +  ( scale(nlslope) + selfResp.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```

# Does a linear trend of similarity-based probabilities predict ingroup choices?

```{r}
m <- glmer( ingChoiceN ~ slope.Z  + predicted.Z + ( slope.Z + predicted.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```

# Backwards solution: Can you predict self-evaluations from similarity to ingroup and outgroup choices?

```{r}
m <- lmer( scale(selfResp) ~ scale(inGsim) + scale(outGsim) + (  scale(inGsim) + scale(outGsim) | subID) + (1 | trait), data = fullTrain)
summary(m)
tidy(m,conf.int=TRUE,effects="fixed")
ggpredict(m, c("inGsim")) %>% plot(show.title=F) + xlab("Similarity to Ingroup Choices") + ylab("Self-Evaluation") + jtools::theme_apa()
```

```{r}
m <- glmer( ingChoiceN ~ scale(slope) * novel  + ( scale(slope) + novel | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```

```{r}
m <- glmer( ingChoiceN ~ eSE + ( eSE | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```


```{r}
m <- glmer( ingChoiceN ~ sSE + ( sSE | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
```

```{r}
m <- glmer( ingChoiceN ~ SE.Z * novel + ( SE.Z + novel | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
```


```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z + scale(desirability) + ( SE.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)

m <- glmer( as.factor(ingChoiceN) ~ scale(oSE) + ( scale(oSE) | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)

m <- glmer( as.factor(ingChoiceN) ~ scale(iSE) + ( scale(iSE) | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)

m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel) + ( SE.Z + as.factor(novel) | subID) + (  1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)

m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel) + scale(desirability) + ( SE.Z + as.factor(novel) + scale(desirability) | subID) + (  1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "novel")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(RSE) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "RSE")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SCC) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "SCC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(DS) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "DS")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(NFC) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "NFC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SING.Ind) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "SING.Ind")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SING.Inter) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "SING.Inter")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(Proto) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "Proto")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SI) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "SI")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(NTB) + ( predicted.Z | subID) + ( predicted.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "NTB")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(RSE) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "RSE")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SCC) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "SCC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(DS) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "DS")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(NFC) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "NFC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SING.Ind) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "SING.Ind")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SING.Inter) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "SING.Inter")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(Proto) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "Proto")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SI) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "SI")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(NTB) + ( entropy.Z | subID) + ( entropy.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "NTB")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(RSE) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "RSE")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SCC) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "SCC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(DS) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "DS")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(NFC) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "NFC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SING.Ind) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "SING.Ind")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SING.Inter) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "SING.Inter")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(Proto) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "Proto")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SI) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "SI")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(NTB) + ( slope.Z | subID) + ( slope.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "NTB")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(RSE) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "RSE")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SCC) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "SCC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(DS) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "DS")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(NFC) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "NFC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SING.Ind) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "SING.Ind")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SING.Inter) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "SING.Inter")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(Proto) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "Proto")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SI) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "SI")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(NTB) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "NTB")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*novel*scale(NFC) + ( predicted.Z+novel | subID) + ( SE.Z*as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "novel","RSE")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SCC) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SCC")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(DS) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","DS")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(NFC) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel", "NFC")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SING.Ind) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SING.Ind")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SING.Inter) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SING.Inter")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(Proto) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "Proto")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SI) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SI")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(NTB) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "novel", "NTB")) %>% plot()
```


